Parsing to an object of class QFeatures
Working from a .csv is likely to cause issues downstream. Indeed, we run
the risk of accidently changing the data or corrupting the file in some way.
Secondly, all .csvs will be formatted slightly different and so making extensible
tools for these files will be inefficient. Furthermore, working with a generic
class used in other mass-spectrometry fields can speed up analysis and adoption
of new methods. We will work the class QFeatures from the QFeatures class
as it is a powerful and scalable way to store quantitative mass-spectrometry data.
Firstly, the data is storted in long format rather than wide format. We first
switch the data to wide format.
MBP_wide <- pivot_wider(data.frame(MBP),
values_from = d,
names_from = c("hx_time", "replicate_cnt", "hx_sample"),
id_cols = c("pep_sequence", "pep_charge"))
head(MBP_wide)
## # A tibble: 6 x 198
## pep_sequence pep_charge `30_1_10%` `30_2_10%` `30_3_10%` `30_4_10%` `30_5_10%`
## <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 VIWINGDKGYNG 2 2.12 2.15 2.14 NA NA
## 2 VIWINGDKGYN~ 2 2.12 2.12 2.11 NA NA
## 3 GDKGYNGLAEVG 3 0.552 0.555 0.553 NA NA
## 4 LAEVGKKFEKD~ 4 2.41 2.36 2.42 NA NA
## 5 AEVGKKFEKDT~ 4 0.458 0.425 0.573 NA NA
## 6 TVEHPDKL 3 1.43 1.44 1.44 NA NA
## # ... with 191 more variables: 30_6_10% <dbl>, 30_7_10% <dbl>, 240_1_10% <dbl>,
## # 240_2_10% <dbl>, 240_3_10% <dbl>, 240_4_10% <dbl>, 240_5_10% <dbl>,
## # 240_6_10% <dbl>, 240_7_10% <dbl>, 1800_1_10% <dbl>, 1800_2_10% <dbl>,
## # 1800_3_10% <dbl>, 1800_4_10% <dbl>, 1800_5_10% <dbl>, 1800_6_10% <dbl>,
## # 1800_7_10% <dbl>, 14400_1_10% <dbl>, 14400_2_10% <dbl>, 14400_3_10% <dbl>,
## # 14400_4_10% <dbl>, 14400_5_10% <dbl>, 14400_6_10% <dbl>, 14400_7_10% <dbl>,
## # 30_1_15% <dbl>, 30_2_15% <dbl>, 30_3_15% <dbl>, 30_4_15% <dbl>,
## # 30_5_15% <dbl>, 30_6_15% <dbl>, 30_7_15% <dbl>, 240_1_15% <dbl>,
## # 240_2_15% <dbl>, 240_3_15% <dbl>, 240_4_15% <dbl>, 240_5_15% <dbl>,
## # 240_6_15% <dbl>, 240_7_15% <dbl>, 1800_1_15% <dbl>, 1800_2_15% <dbl>,
## # 1800_3_15% <dbl>, 1800_4_15% <dbl>, 1800_5_15% <dbl>, 1800_6_15% <dbl>,
## # 1800_7_15% <dbl>, 14400_1_15% <dbl>, 14400_2_15% <dbl>, 14400_3_15% <dbl>,
## # 14400_4_15% <dbl>, 14400_5_15% <dbl>, 14400_6_15% <dbl>, 14400_7_15% <dbl>,
## # 30_1_20% <dbl>, 30_2_20% <dbl>, 30_3_20% <dbl>, 30_4_20% <dbl>,
## # 30_5_20% <dbl>, 30_6_20% <dbl>, 30_7_20% <dbl>, 240_1_20% <dbl>,
## # 240_2_20% <dbl>, 240_3_20% <dbl>, 240_4_20% <dbl>, 240_5_20% <dbl>,
## # 240_6_20% <dbl>, 240_7_20% <dbl>, 1800_1_20% <dbl>, 1800_2_20% <dbl>,
## # 1800_3_20% <dbl>, 1800_4_20% <dbl>, 1800_5_20% <dbl>, 1800_6_20% <dbl>,
## # 1800_7_20% <dbl>, 14400_1_20% <dbl>, 14400_2_20% <dbl>, 14400_3_20% <dbl>,
## # 14400_4_20% <dbl>, 14400_5_20% <dbl>, 14400_6_20% <dbl>, 14400_7_20% <dbl>,
## # 30_1_25% <dbl>, 30_2_25% <dbl>, 30_3_25% <dbl>, 30_4_25% <dbl>,
## # 30_5_25% <dbl>, 30_6_25% <dbl>, 30_7_25% <dbl>, 240_1_25% <dbl>,
## # 240_2_25% <dbl>, 240_3_25% <dbl>, 240_4_25% <dbl>, 240_5_25% <dbl>,
## # 240_6_25% <dbl>, 240_7_25% <dbl>, 1800_1_25% <dbl>, 1800_2_25% <dbl>,
## # 1800_3_25% <dbl>, 1800_4_25% <dbl>, 1800_5_25% <dbl>, 1800_6_25% <dbl>,
## # 1800_7_25% <dbl>, ...
We notice that there are many columns with NAs. The follow code chunk removes
these columns.
MBP_wide <- MBP_wide[, colSums(is.na(MBP_wide)) != nrow(MBP_wide)]
We also note that the colnames are not very informative. We are going to format
in a very specific way so that later functions can automatically infer the design
from the column names. We provide in the format X(time)rep(replicate)cond(condition)
colnames(MBP_wide)[-c(1,2)]
## [1] "30_1_10%" "30_2_10%" "30_3_10%" "240_1_10%"
## [5] "240_2_10%" "240_3_10%" "1800_1_10%" "1800_2_10%"
## [9] "1800_3_10%" "14400_1_10%" "14400_2_10%" "14400_3_10%"
## [13] "30_1_15%" "30_2_15%" "30_3_15%" "240_1_15%"
## [17] "240_2_15%" "240_3_15%" "1800_1_15%" "1800_2_15%"
## [21] "1800_3_15%" "14400_1_15%" "14400_2_15%" "14400_3_15%"
## [25] "30_1_20%" "30_2_20%" "30_3_20%" "240_1_20%"
## [29] "240_2_20%" "240_3_20%" "1800_1_20%" "1800_2_20%"
## [33] "1800_3_20%" "14400_1_20%" "14400_2_20%" "14400_3_20%"
## [37] "30_1_25%" "30_2_25%" "30_3_25%" "240_1_25%"
## [41] "240_2_25%" "240_3_25%" "1800_1_25%" "1800_2_25%"
## [45] "1800_3_25%" "14400_1_25%" "14400_2_25%" "14400_3_25%"
## [49] "30_1_5%" "30_2_5%" "30_3_5%" "240_1_5%"
## [53] "240_2_5%" "240_3_5%" "1800_1_5%" "1800_2_5%"
## [57] "1800_3_5%" "14400_1_5%" "14400_2_5%" "14400_3_5%"
## [61] "30_1_W169G" "30_2_W169G" "30_3_W169G" "240_1_W169G"
## [65] "240_2_W169G" "240_3_W169G" "1800_1_W169G" "1800_2_W169G"
## [69] "1800_3_W169G" "14400_1_W169G" "14400_2_W169G" "14400_3_W169G"
## [73] "30_1_WT Null" "30_2_WT Null" "30_3_WT Null" "30_4_WT Null"
## [77] "30_5_WT Null" "30_6_WT Null" "30_7_WT Null" "240_1_WT Null"
## [81] "240_2_WT Null" "240_3_WT Null" "240_4_WT Null" "240_5_WT Null"
## [85] "240_6_WT Null" "240_7_WT Null" "1800_1_WT Null" "1800_2_WT Null"
## [89] "1800_3_WT Null" "1800_4_WT Null" "1800_5_WT Null" "1800_6_WT Null"
## [93] "1800_7_WT Null" "14400_1_WT Null" "14400_2_WT Null" "14400_3_WT Null"
## [97] "14400_4_WT Null" "14400_5_WT Null" "14400_6_WT Null" "14400_7_WT Null"
new.colnames <- gsub("0_", "0rep", paste0("X", colnames(MBP_wide)[-c(1,2)]))
new.colnames <- gsub("_", "cond", new.colnames)
# remove annoying % signs
new.colnames <- gsub("%", "", new.colnames)
# remove space (NULL could get confusing later and WT is clear)
new.colnames <- gsub(" .*", "", new.colnames)
new.colnames
## [1] "X30rep1cond10" "X30rep2cond10" "X30rep3cond10"
## [4] "X240rep1cond10" "X240rep2cond10" "X240rep3cond10"
## [7] "X1800rep1cond10" "X1800rep2cond10" "X1800rep3cond10"
## [10] "X14400rep1cond10" "X14400rep2cond10" "X14400rep3cond10"
## [13] "X30rep1cond15" "X30rep2cond15" "X30rep3cond15"
## [16] "X240rep1cond15" "X240rep2cond15" "X240rep3cond15"
## [19] "X1800rep1cond15" "X1800rep2cond15" "X1800rep3cond15"
## [22] "X14400rep1cond15" "X14400rep2cond15" "X14400rep3cond15"
## [25] "X30rep1cond20" "X30rep2cond20" "X30rep3cond20"
## [28] "X240rep1cond20" "X240rep2cond20" "X240rep3cond20"
## [31] "X1800rep1cond20" "X1800rep2cond20" "X1800rep3cond20"
## [34] "X14400rep1cond20" "X14400rep2cond20" "X14400rep3cond20"
## [37] "X30rep1cond25" "X30rep2cond25" "X30rep3cond25"
## [40] "X240rep1cond25" "X240rep2cond25" "X240rep3cond25"
## [43] "X1800rep1cond25" "X1800rep2cond25" "X1800rep3cond25"
## [46] "X14400rep1cond25" "X14400rep2cond25" "X14400rep3cond25"
## [49] "X30rep1cond5" "X30rep2cond5" "X30rep3cond5"
## [52] "X240rep1cond5" "X240rep2cond5" "X240rep3cond5"
## [55] "X1800rep1cond5" "X1800rep2cond5" "X1800rep3cond5"
## [58] "X14400rep1cond5" "X14400rep2cond5" "X14400rep3cond5"
## [61] "X30rep1condW169G" "X30rep2condW169G" "X30rep3condW169G"
## [64] "X240rep1condW169G" "X240rep2condW169G" "X240rep3condW169G"
## [67] "X1800rep1condW169G" "X1800rep2condW169G" "X1800rep3condW169G"
## [70] "X14400rep1condW169G" "X14400rep2condW169G" "X14400rep3condW169G"
## [73] "X30rep1condWT" "X30rep2condWT" "X30rep3condWT"
## [76] "X30rep4condWT" "X30rep5condWT" "X30rep6condWT"
## [79] "X30rep7condWT" "X240rep1condWT" "X240rep2condWT"
## [82] "X240rep3condWT" "X240rep4condWT" "X240rep5condWT"
## [85] "X240rep6condWT" "X240rep7condWT" "X1800rep1condWT"
## [88] "X1800rep2condWT" "X1800rep3condWT" "X1800rep4condWT"
## [91] "X1800rep5condWT" "X1800rep6condWT" "X1800rep7condWT"
## [94] "X14400rep1condWT" "X14400rep2condWT" "X14400rep3condWT"
## [97] "X14400rep4condWT" "X14400rep5condWT" "X14400rep6condWT"
## [100] "X14400rep7condWT"
We will now parse the data into an object of class QFeatures, we have provided
a function to assist with this in the package. If you want to do this yourself
use the readQFeatures function from the QFeatures package.
MBPqDF <- parseDeutData(object = DataFrame(MBP_wide),
design = new.colnames,
quantcol = 3:102)
Analysis of a typical HDX-MS experiment
We have seen the basic aspects of our functional modelling approach. We now
wish to roll out our method across all peptides in the experiment. The
fitUptakeKinetics function allows us to apply our modelling approach across
all the peptide in the experiment. We need to provide a QFeatures object
and the features for which we are fitting the model. The design will be extracted
from the column names or you can provide a design yourself. The parameter
initilisation should also be provided. Sometimes the model can’t be fit on the
kinetics. This is either because there is not enough data or through lack of
convergence. An error will be reported in these cases but this should not
perturb the user. You may wish to try a few starting values if there
excessive models that fail fitting.
res <- fitUptakeKinetics(object = MBPqDF[,c(1:24)],
feature = rownames(MBPqDF[,c(1:24)])[[1]],
start = list(a = NULL, b = 0.001, d = NULL, p = 1))
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
The code chunk above returns a class HdxStatModels indicating that a number
of models for peptide have been fit. This is simply a holder for a list
of HdxStatModel instances.
res
## Object of class "HdxStatModels"
## Number of models 104
We can easily examine indivual fits by going to the underyling HdxStatModel
class:
res@statmodels[[1]]@vis + scale_color_manual(values = brewer.pal(n = 2, name = "Set2"))
## Warning in brewer.pal(n = 2, name = "Set2"): minimal value for n is 3, returning requested palette with 3 different levels
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.
We now wish to apply statistical analysis to these fitted curves. Our approach
is an empirical Bayes testing procedure, which borrows information across peptides
to stablise variance estimates. Here, we need to provide the original data
that was analysed and the HdxStatModels class. The following code chunk
returns an object of class HdxStatRes. This object tell us that statistical
analysis was performed using our Functional model.
out <- processFunctional(object = MBPqDF[,1:24], params = res)
out
## Object of class "HdxStatRes"
## Analysed using Functional model
The main slot of interest is the results slot which returns quantities of
interest such as p-values and fdr corrected p-values because of multiple testing.
The following is the DataFrame of interest.
out@results
## DataFrame with 104 rows and 8 columns
## Fstat.Fstat Fstat.numerator Fstat.denomenator
## <list> <list> <list>
## VIWINGDKGYNG_2 0.70875 0.00109168 0.00154029
## VIWINGDKGYNGL_2 0.0329098 6.18014e-05 0.0018779
## GDKGYNGLAEVG_3 0.980103 0.00129713 0.00132346
## LAEVGKKFEKDTGIKVTVEHPDK_4 0.809885 0.00117025 0.00144496
## AEVGKKFEKDTGIKV_4 0.806449 0.00140588 0.00174329
## ... ... ... ...
## NAQKGEIMPNIPQM_2 1.74802 0.00425775 0.00243575
## NAQKGEIMPNIPQMSA_2 5.50307 0.013198 0.00239829
## NAQKGEIMPNIPQMSAF_2 7.06683 0.00431371 0.000610416
## PNIPQMSAF_1 0.905875 0.00277575 0.00306417
## SAFWYAVRTAVINAA_4 0.364667 0.00215098 0.00589847
## pvals fdr ebayes.pvals ebayes.fdr
## <numeric> <numeric> <numeric> <numeric>
## VIWINGDKGYNG_2 0.597687 0.999959 0.584744 0.999952
## VIWINGDKGYNGL_2 0.997692 0.999959 0.997487 0.999952
## GDKGYNGLAEVG_3 0.445918 0.999959 0.438269 0.999952
## LAEVGKKFEKDTGIKVTVEHPDK_4 0.536935 0.999959 0.525822 0.999952
## AEVGKKFEKDTGIKV_4 0.538920 0.999959 0.520828 0.999952
## ... ... ... ... ...
## NAQKGEIMPNIPQM_2 0.18879415 0.6333739 0.16773031 0.6229983
## NAQKGEIMPNIPQMSA_2 0.00554988 0.0443991 0.00430216 0.0394778
## NAQKGEIMPNIPQMSAF_2 0.00177921 0.0231297 0.00269021 0.0378613
## PNIPQMSAF_1 0.48384985 0.9999586 0.44999538 0.9999517
## SAFWYAVRTAVINAA_4 0.83017683 0.9999586 0.80620115 0.9999517
## fitcomplete
## <integer>
## VIWINGDKGYNG_2 1
## VIWINGDKGYNGL_2 2
## GDKGYNGLAEVG_3 3
## LAEVGKKFEKDTGIKVTVEHPDK_4 4
## AEVGKKFEKDTGIKV_4 5
## ... ...
## NAQKGEIMPNIPQM_2 100
## NAQKGEIMPNIPQMSA_2 101
## NAQKGEIMPNIPQMSAF_2 102
## PNIPQMSAF_1 103
## SAFWYAVRTAVINAA_4 104
We can now examine the peptides for which the false discovery rate is less
than 0.05
which(out@results$ebayes.fdr < 0.05)
## AADGGYAFKYENGKY_3 FKYENGKY_3 DIKDVGVDNAGAKAGL_2
## 42 45 47
## VGVDNAGAKAGLTF_2 VDLIKNKHMNA_4 VDLIKNKHMNADTD_3
## 50 55 56
## VDLIKNKHMNADTDY_4 IDTSKVNY_2 AKDPRIAATM_3
## 57 68 96
## ENAQKGEIMPNIPQMSAF_2 NAQKGEIMPNIPQMSA_2 NAQKGEIMPNIPQMSAF_2
## 99 101 102
Let us visualise some of these examples:
res@statmodels[[42]]@vis + res@statmodels[[45]]@vis
As we can see our model has picked up some subtle differences, we can further
visualise these using a forest plot. We can see the the functions are very similar
as the parameters are almost identical (a,b,p,d). However, we can see that
the deuterium differences are lower in 10% structural variant condition.
fp <- forestPlot(params = res@statmodels[[42]])
We can produce a table to actual numbers. We see that at all 4 timepoints
the deuterium difference is negative, though the confidence intervals overlap
with 0. Our functional approach is picking up this small but reproducible difference.
knitr::kable(fp$data)
Let’s us now have a look a situation where the changes are more dramatic.
res_wt <- fitUptakeKinetics(object = MBPqDF[, c(61:100)],
feature = rownames(MBPqDF[, c(61:100)])[[1]],
start = list(a = NULL, b = 0.001, d = NULL, p = 1))
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "Could not fit model, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
## Error in nlsModel(formula, mf, start, wts) :
## singular gradient matrix at initial parameter estimates
## [1] "model fit failed, likely exessive missing values"
out_wt <- processFunctional(object = MBPqDF[, c(61:100)], params = res_wt)
We can visualise some of the result and generate plots.
res_wt@statmodels[[27]]@vis/res_wt@statmodels[[28]]@vis + plot_layout(guides = "collect")|(forestPlot(params = res_wt@statmodels[[27]], condition = c("WT", "W169G"))/forestPlot(params = res_wt@statmodels[[28]], condition = c("WT", "W169G")) + plot_layout(guides = "collect")) +
plot_annotation(tag_levels = 'a') + plot_layout(widths = c(1, 1))


# An epitope mapping experiment
We now describe the analysis of an epitope mapping experiment. Here, the data
analysis is more challenging, since only 1 replicate in each condition, apo and
antibody, was performed. If we make some simplifying assumptions rigorous
statistical analysis can still be performed.
The experiment was performed on HOIP-RBR, we loaded the data below from inside
the package
HOIPpath <- system.file("extdata", "N64184_1a2_state.csv", package = "hdxstats")
HOIP <- read.csv(HOIPpath)
unique(HOIP$State)
## [1] "apo" "dAb13_1" "dAb13_2" "dAb25_1" "dAb25_2" "dAb27_1" "dAb27_2"
## [8] "dAb2_1" "dAb2_2" "dAb6_1" "dAb6_2"
HOIP$Exposure <- HOIP$Exposure * 60 #convert to seconds
filter(HOIP, Sequence == unique(HOIP$Sequence[1])) %>%
ggplot(aes(x = Exposure,
y = Center,
color = factor(State, unique(HOIP$State)))) +
theme_classic() + geom_point(size = 3) +
scale_color_manual(values = colorRampPalette(brewer.pal(8, name = "Set2"))(11)) +
labs(color = "experiment", x = "Deuterium Exposure", y = "Deuterium incoperation")

As before we need to convert data to an object of classes QFeatures
for ease of analysis.
First, we put the data into a DataFrame object. Currently, its in long format
so we switch to a wide format
HOIP_wide <- pivot_wider(data.frame(HOIP),
values_from = Center,
names_from = c("Exposure", "State"),
id_cols = c("Sequence"))
Now remove all columns with only NAs
HOIP_wide <- HOIP_wide[, colSums(is.na(HOIP_wide)) != nrow(HOIP_wide)]
The colanmes are not very informative, provide in the format X(time)rep(repliate)cond(condition)
colnames(HOIP_wide)[-c(1)]
## [1] "0_apo" "30_apo" "300_apo" "0_dAb13_1" "30_dAb13_1"
## [6] "300_dAb13_1" "0_dAb13_2" "30_dAb13_2" "300_dAb13_2" "0_dAb25_1"
## [11] "30_dAb25_1" "300_dAb25_1" "0_dAb25_2" "30_dAb25_2" "300_dAb25_2"
## [16] "0_dAb27_1" "30_dAb27_1" "300_dAb27_1" "0_dAb27_2" "30_dAb27_2"
## [21] "300_dAb27_2" "0_dAb2_1" "30_dAb2_1" "300_dAb2_1" "0_dAb2_2"
## [26] "30_dAb2_2" "300_dAb2_2" "0_dAb6_1" "30_dAb6_1" "300_dAb6_1"
## [31] "0_dAb6_2" "30_dAb6_2" "300_dAb6_2"
new.colnames <- gsub("0_", "0rep1", paste0("X", colnames(HOIP_wide)[-c(1)]))
new.colnames <- gsub("rep1", "rep1cond", new.colnames)
# remove annoying % signs
new.colnames <- gsub("%", "", new.colnames)
# remove space (NULL could get confusing later and WT is clear)
new.colnames <- gsub(" .*", "", new.colnames)
Now, we can provide rownames and convert the data to a QFeatures object:
qDF <- parseDeutData(object = DataFrame(HOIP_wide),
design = new.colnames,
quantcol = 2:34,
rownames = HOIP_wide$Sequence)
As before, we can produce a heatmap, we perform a simple normalisation for
ease of visualisation:
mat <- assay(qDF)
mat <- apply(mat, 2, function(x) x - assay(qDF)[,1])
pheatmap(t(mat),
cluster_rows = FALSE,
cluster_cols = FALSE,
color = brewer.pal(n = 9, name = "BuPu"),
main = "HOIP RBR heatmap",
fontsize = 14,
legend_breaks = c(0, 2, 4, 6,8,10,12, max(assay(qDF))),
legend_labels = c("0", "2", "4", "6", "8","10", "12", "Incorporation"))

Let us first perform a quick test:
res <- differentialUptakeKinetics(object = qDF[,1:33],
feature = rownames(qDF)[[1]][3],
start = list(a = NULL, b = 0.01, d = NULL),
formula = value ~ a * (1 - exp(-b*(timepoint))) + d)
res@vis+ scale_color_manual(values = colorRampPalette(brewer.pal(8, name = "Set2"))(11))
## Scale for 'colour' is already present. Adding another scale for 'colour',
## which will replace the existing scale.

Whilst this analysis performs good fits for the functions, there are too many
degrees of freedom to perform sound statistical analysis. Hence, we normalize
to remove the degree of freedom for the intercept. For simplicity and to preserve
the original matrix, we reprocess the data. We then fit a simplified kinetic
model, where only the plateau is inferred.
cn <- new.colnames[c(1:3,10:12)]
HOIP_wide_nrm <- data.frame(HOIP_wide)
HOIP_wide_nrm[, c(2:4)] <- HOIP_wide_nrm[,c(2:4)] - HOIP_wide_nrm[,c(2)] # normalise by intercept
HOIP_wide_nrm[, c(11:13)] <- HOIP_wide_nrm[,c(11:13)] - HOIP_wide_nrm[,c(11)] # normalised by intercept
newqDF <- parseDeutData(object = DataFrame(HOIP_wide_nrm),
design = cn,
quantcol = c(2:4, 11:13), rownames = HOIP_wide$Sequence)
res_all <- fitUptakeKinetics(object = newqDF[,1:6],
feature = rownames(newqDF[,1:6])[[1]],
start = list(a = NULL),
formula = value ~ a * (1 - exp(-0.05*(timepoint))))
funresdAb25_1 <- processFunctional(object = newqDF[,1:6],
params = res_all)
We can have a look at the results:
funresdAb25_1@results
## DataFrame with 110 rows and 8 columns
## Fstat.Fstat Fstat.numerator Fstat.denomenator
## <list> <list> <list>
## GPGQECA 0.505943 0.0236133 0.0466718
## CAVCGWALPHNRM 0.819962 0.0111598 0.0136102
## CAVCGWALPHNRMQAL 3.18841 0.130744 0.0410059
## CAVCGWALPHNRMQALTSCE 1.51203 0.598316 0.395703
## AVCGWALPHNRM 0.138914 0.00401256 0.0288852
## ... ... ... ...
## ATERYLHVRPQPLAGEDPPAYQARL 0.882916 0.0634907 0.0719102
## ERYLHVRPQPLAGEDPPAYQ 0.841277 0.0671491 0.0798181
## ERYLHVRPQPLAGEDPPAYQARL 2.11026 0.172771 0.081872
## RYLHVRPQPLAGEDPPAYQARL 1.65969 0.120275 0.0724686
## LQKLTEEVPLGQSIPRRRK 4.48223 0.196504 0.0438406
## pvals fdr ebayes.pvals ebayes.fdr
## <numeric> <numeric> <numeric> <numeric>
## GPGQECA 0.516181 0.652643 0.479007 0.612683
## CAVCGWALPHNRM 0.416403 0.558589 0.406959 0.545920
## CAVCGWALPHNRMQAL 0.148709 0.320744 0.123017 0.300707
## CAVCGWALPHNRMQALTSCE 0.286210 0.499732 0.237829 0.441045
## AVCGWALPHNRM 0.728272 0.801099 0.708845 0.779730
## ... ... ... ... ...
## ATERYLHVRPQPLAGEDPPAYQARL 0.400605 0.557804 0.3563859 0.502596
## ERYLHVRPQPLAGEDPPAYQ 0.410930 0.558053 0.3658748 0.509446
## ERYLHVRPQPLAGEDPPAYQARL 0.219967 0.443454 0.1817334 0.386882
## RYLHVRPQPLAGEDPPAYQARL 0.267115 0.481684 0.2263725 0.436859
## LQKLTEEVPLGQSIPRRRK 0.101671 0.266280 0.0814249 0.246599
## fitcomplete
## <integer>
## GPGQECA 1
## CAVCGWALPHNRM 2
## CAVCGWALPHNRMQAL 3
## CAVCGWALPHNRMQALTSCE 4
## AVCGWALPHNRM 5
## ... ...
## ATERYLHVRPQPLAGEDPPAYQARL 106
## ERYLHVRPQPLAGEDPPAYQ 107
## ERYLHVRPQPLAGEDPPAYQARL 108
## RYLHVRPQPLAGEDPPAYQARL 109
## LQKLTEEVPLGQSIPRRRK 110
which(funresdAb25_1@results$ebayes.fdr < 0.05)
## IQLRESLEPDA RESLEPDAYALFHKKLTEGVL YALFHKKLTEGVL
## 36 42 43
## REQLEATCPQCHQTF EATCPQCHQTF MYLQENGIDCPKCKF
## 52 53 65
## YLQENGIDCPKCKFSYA LQENGIDCPKCKFSYA
## 68 70
We can plot these kinetics to see what is happening. This allows us to visualise
region of protection and deprotection, potentially identifiying the epitope.
(res_all@statmodels[[36]]@vis +
res_all@statmodels[[42]]@vis +
res_all@statmodels[[43]]@vis +
res_all@statmodels[[65]]@vis +
res_all@statmodels[[68]]@vis +
res_all@statmodels[[70]]@vis +
res_all@statmodels[[52]]@vis +
res_all@statmodels[[53]]@vis ) + plot_layout(guides = 'collect')
We can make a Manhatten plot to better specially visualise what’s happening.
#We need to provide an indication of "difference" so we can examine deprotected
# or prected regions
diffdata <- assay(newqDF)[,6] - assay(newqDF)[,3]
sigplots <- manhattentplot(params = funresdAb25_1,
sequences = HOIP$Sequence,
region = HOIP[, c("Start", "End")],
difference = diffdata,
nrow = 1)
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
sigplots[[1]] + plot_layout(guides = 'collect')
